Finish merge of old domain allocation code with tehe buddy-allocator system.
Now just needs exporting to priv guest OSes so they can allocate multi-page
physmem chunks. Then to fix Linux's pci_alloc_consistent().
put_page(page);
/* Relinquish all pages on the domain's allocation list. */
- spin_lock_recursive(&d->page_alloc_lock); /* may enter free_domain_page */
+ spin_lock_recursive(&d->page_alloc_lock); /* may enter free_domheap_page */
list_for_each_safe ( ent, tmp, &d->page_list )
{
page = list_entry(ent, struct pfn_info, list);
static inline struct pfn_info *alloc_shadow_page(struct mm_struct *m)
{
m->shadow_page_count++;
- return alloc_domheap_page();
+ return alloc_domheap_page(NULL);
}
void unshadow_table( unsigned long gpfn, unsigned int type )
for ( i = 0; i < nr_pages; i++ )
{
- if ( unlikely((page = alloc_domain_page(d)) == NULL) )
+ if ( unlikely((page = alloc_domheap_page(d)) == NULL) )
{
DPRINTK("Could not allocate a frame\n");
break;
#include <xen/console.h>
#include <asm/shadow.h>
#include <hypervisor-ifs/dom0_ops.h>
-#include <asm/hardirq.h>
#include <asm/domain_page.h>
/* Both these structures are protected by the tasklist_lock. */
__enter_scheduler();
}
-struct pfn_info *alloc_domain_page(struct domain *d)
-{
- struct pfn_info *page = NULL;
- unsigned long mask, pfn_stamp, cpu_stamp;
- int i;
-
- ASSERT(!in_irq());
-
- page = alloc_domheap_page();
- if ( unlikely(page == NULL) )
- return NULL;
-
- if ( (mask = page->u.free.cpu_mask) != 0 )
- {
- pfn_stamp = page->tlbflush_timestamp;
- for ( i = 0; (mask != 0) && (i < smp_num_cpus); i++ )
- {
- if ( mask & (1<<i) )
- {
- cpu_stamp = tlbflush_time[i];
- if ( !NEED_FLUSH(cpu_stamp, pfn_stamp) )
- mask &= ~(1<<i);
- }
- }
-
- if ( unlikely(mask != 0) )
- {
- flush_tlb_mask(mask);
- perfc_incrc(need_flush_tlb_flush);
- }
- }
-
- page->u.inuse.domain = d;
- page->u.inuse.type_info = 0;
- if ( d != NULL )
- {
- wmb(); /* Domain pointer must be visible before updating refcnt. */
- spin_lock(&d->page_alloc_lock);
- if ( unlikely(d->tot_pages >= d->max_pages) )
- {
- DPRINTK("Over-allocation for domain %u: %u >= %u\n",
- d->domain, d->tot_pages, d->max_pages);
- spin_unlock(&d->page_alloc_lock);
- page->u.inuse.domain = NULL;
- goto free_and_exit;
- }
- list_add_tail(&page->list, &d->page_list);
- page->u.inuse.count_info = PGC_allocated | 1;
- if ( unlikely(d->tot_pages++ == 0) )
- get_domain(d);
- spin_unlock(&d->page_alloc_lock);
- }
-
- return page;
-
- free_and_exit:
- free_domheap_page(page);
- return NULL;
-}
-
-void free_domain_page(struct pfn_info *page)
-{
- int drop_dom_ref;
- struct domain *d = page->u.inuse.domain;
-
- if ( unlikely(IS_XEN_HEAP_FRAME(page)) )
- {
- spin_lock_recursive(&d->page_alloc_lock);
- drop_dom_ref = (--d->xenheap_pages == 0);
- spin_unlock_recursive(&d->page_alloc_lock);
- }
- else
- {
- page->tlbflush_timestamp = tlbflush_clock;
- page->u.free.cpu_mask = 1 << d->processor;
-
- /* NB. May recursively lock from domain_relinquish_memory(). */
- spin_lock_recursive(&d->page_alloc_lock);
- list_del(&page->list);
- drop_dom_ref = (--d->tot_pages == 0);
- spin_unlock_recursive(&d->page_alloc_lock);
-
- page->u.inuse.count_info = 0;
-
- free_domheap_page(page);
- }
-
- if ( drop_dom_ref )
- put_domain(d);
-}
-
unsigned int alloc_new_dom_mem(struct domain *d, unsigned int kbytes)
{
unsigned int alloc_pfns, nr_pages;
/* Grow the allocation if necessary. */
for ( alloc_pfns = d->tot_pages; alloc_pfns < nr_pages; alloc_pfns++ )
{
- if ( unlikely((page=alloc_domain_page(d)) == NULL) )
+ if ( unlikely((page = alloc_domheap_page(d)) == NULL) )
{
domain_relinquish_memory(d);
return -ENOMEM;
#include <asm/page.h>
#include <xen/spinlock.h>
#include <xen/slab.h>
+#include <xen/irq.h>
/*********************
struct pfn_info *pg;
unsigned long flags;
+ if ( unlikely(order < MIN_ORDER) || unlikely(order > MAX_ORDER) )
+ return NULL;
+
spin_lock_irqsave(&heap_lock, flags);
/* Find smallest order which can satisfy the request. */
init_heap_pages(MEMZONE_DOM, phys_to_page(ps), (pe - ps) >> PAGE_SHIFT);
}
-struct pfn_info *alloc_domheap_pages(int order)
+struct pfn_info *alloc_domheap_pages(struct domain *d, int order)
{
- struct pfn_info *pg = alloc_heap_pages(MEMZONE_DOM, order);
+ struct pfn_info *pg;
+ unsigned long mask, flushed_mask, pfn_stamp, cpu_stamp;
+ int i;
+
+ ASSERT(!in_irq());
+
+ if ( unlikely((pg = alloc_heap_pages(MEMZONE_DOM, order)) == NULL) )
+ return NULL;
+
+ flushed_mask = 0;
+ for ( i = 0; i < (1 << order); i++ )
+ {
+ pg[i].u.inuse.domain = NULL;
+ pg[i].u.inuse.type_info = 0;
+
+ if ( (mask = (pg[i].u.free.cpu_mask & ~flushed_mask)) != 0 )
+ {
+ pfn_stamp = pg[i].tlbflush_timestamp;
+ for ( i = 0; (mask != 0) && (i < smp_num_cpus); i++ )
+ {
+ if ( mask & (1<<i) )
+ {
+ cpu_stamp = tlbflush_time[i];
+ if ( !NEED_FLUSH(cpu_stamp, pfn_stamp) )
+ mask &= ~(1<<i);
+ }
+ }
+
+ if ( unlikely(mask != 0) )
+ {
+ flush_tlb_mask(mask);
+ perfc_incrc(need_flush_tlb_flush);
+ flushed_mask |= mask;
+ }
+ }
+ }
+
+ if ( d == NULL )
+ return pg;
+
+ spin_lock(&d->page_alloc_lock);
+
+ if ( unlikely((d->tot_pages + (1 << order)) > d->max_pages) )
+ {
+ DPRINTK("Over-allocation for domain %u: %u > %u\n",
+ d->domain, d->tot_pages + (1 << order), d->max_pages);
+ spin_unlock(&d->page_alloc_lock);
+ free_heap_pages(MEMZONE_DOM, pg, order);
+ return NULL;
+ }
+
+ if ( unlikely(d->tot_pages == 0) )
+ get_domain(d);
+
+ d->tot_pages += 1 << order;
+
+ for ( i = 0; i < (1 << order); i++ )
+ {
+ pg[i].u.inuse.domain = d;
+ wmb(); /* Domain pointer must be visible before updating refcnt. */
+ pg->u.inuse.count_info = PGC_allocated | 1;
+ list_add_tail(&pg->list, &d->page_list);
+ }
+
+ spin_unlock(&d->page_alloc_lock);
+
return pg;
}
void free_domheap_pages(struct pfn_info *pg, int order)
{
- free_heap_pages(MEMZONE_DOM, pg, order);
+ int i, drop_dom_ref;
+ struct domain *d = pg->u.inuse.domain;
+
+ if ( unlikely(IS_XEN_HEAP_FRAME(pg)) )
+ {
+ spin_lock_recursive(&d->page_alloc_lock);
+ d->xenheap_pages -= 1 << order;
+ drop_dom_ref = (d->xenheap_pages == 0);
+ spin_unlock_recursive(&d->page_alloc_lock);
+ }
+ else
+ {
+ /* NB. May recursively lock from domain_relinquish_memory(). */
+ spin_lock_recursive(&d->page_alloc_lock);
+
+ for ( i = 0; i < (1 << order); i++ )
+ {
+ pg[i].tlbflush_timestamp = tlbflush_clock;
+ pg[i].u.inuse.count_info = 0;
+ pg[i].u.free.cpu_mask = 1 << d->processor;
+ list_del(&pg[i].list);
+ }
+
+ d->tot_pages -= 1 << order;
+ drop_dom_ref = (d->tot_pages == 0);
+
+ spin_unlock_recursive(&d->page_alloc_lock);
+
+ free_heap_pages(MEMZONE_DOM, pg, order);
+ }
+
+ if ( drop_dom_ref )
+ put_domain(d);
}
unsigned long avail_domheap_pages(void)
{
return avail[MEMZONE_DOM];
}
+
extern unsigned long max_page;
void init_frametable(void *frametable_vstart, unsigned long nr_pages);
-struct pfn_info *alloc_domain_page(struct domain *d);
-void free_domain_page(struct pfn_info *page);
-
int alloc_page_type(struct pfn_info *page, unsigned int type);
void free_page_type(struct pfn_info *page, unsigned int type);
while ( unlikely((y = cmpxchg(&page->u.inuse.count_info, x, nx)) != x) );
if ( unlikely((nx & PGC_count_mask) == 0) )
- free_domain_page(page);
+ free_domheap_page(page);
}
#ifndef __XEN_MM_H__
#define __XEN_MM_H__
-#include <asm/mm.h>
+struct domain;
+struct pfn_info;
/* Generic allocator */
unsigned long init_heap_allocator(
/* Domain suballocator */
void init_domheap_pages(unsigned long ps, unsigned long pe);
-struct pfn_info *alloc_domheap_pages(int order);
+struct pfn_info *alloc_domheap_pages(struct domain *d, int order);
void free_domheap_pages(struct pfn_info *pg, int order);
unsigned long avail_domheap_pages(void);
-#define alloc_domheap_page() (alloc_domheap_pages(0))
+#define alloc_domheap_page(_d) (alloc_domheap_pages(_d,0))
#define free_domheap_page(_p) (free_domheap_pages(_p,0))
+#include <asm/mm.h>
+
#endif /* __XEN_MM_H__ */